412dfae9eA3_6e6bCGUtg1mj8b56fQ linux-2.6.8.1-xen-sparse/arch/xen/kernel/gnttab.c
40f56239sFcjHiIRmnObRIDF-zaeKQ linux-2.6.8.1-xen-sparse/arch/xen/kernel/process.c
40f562392LBhwmOxVPsYdkYXMxI_ZQ linux-2.6.8.1-xen-sparse/arch/xen/kernel/reboot.c
+414c113396tK1HTVeUalm3u-1DF16g linux-2.6.8.1-xen-sparse/arch/xen/kernel/skbuff.c
3f68905c5eiA-lBMQSvXLMWS1ikDEA linux-2.6.8.1-xen-sparse/arch/xen/kernel/xen_proc.c
41261688yS8eAyy-7kzG4KBs0xbYCA linux-2.6.8.1-xen-sparse/drivers/Makefile
4108f5c1WfTIrs0HZFeV39sttekCTw linux-2.6.8.1-xen-sparse/drivers/char/mem.c
comment 'Xen'
bool 'Support for privileged operations (domain 0)' CONFIG_XEN_PRIVILEGED_GUEST
bool 'Device-driver domain (physical device access)' CONFIG_XEN_PHYSDEV_ACCESS
+bool 'Scrub memory before freeing it to Xen' CONFIG_XEN_SCRUB_PAGES
endmenu
# The IBM S/390 patch needs this.
define_bool CONFIG_NO_IDLE_HZ y
#
CONFIG_XEN_PRIVILEGED_GUEST=y
CONFIG_XEN_PHYSDEV_ACCESS=y
+CONFIG_XEN_SCRUB_PAGES=y
CONFIG_NO_IDLE_HZ=y
CONFIG_FOREIGN_PAGES=y
#
# CONFIG_XEN_PRIVILEGED_GUEST is not set
# CONFIG_XEN_PHYSDEV_ACCESS is not set
+CONFIG_XEN_SCRUB_PAGES=y
CONFIG_NO_IDLE_HZ=y
# CONFIG_FOREIGN_PAGES is not set
CONFIG_NETDEVICES=y
{
unsigned long mfn = phys_to_machine_mapping[*currp];
curraddr = (unsigned long)page_address(mem_map + *currp);
+ /* Blow away page contents for security, and also p.t. ref if any. */
if ( curraddr != 0 )
+ {
+ scrub_pages(curraddr, 1);
queue_l1_entry_update(get_ptep(curraddr), 0);
+ }
+#ifdef CONFIG_XEN_SCRUB_PAGES
+ else
+ {
+ void *p = kmap(&mem_map[*currp]);
+ scrub_pages(p, 1);
+ kunmap(&mem_map[*currp]);
+ }
+#endif
phys_to_machine_mapping[*currp] = DEAD;
*currp = mfn;
}
}
len = strnlen_user(buffer, count);
- if (len==0) return -EBADMSG;
- if (len==1) return 1; /* input starts with a NUL char */
- if ( strncpy_from_user(memstring, buffer, len) < 0)
+ if ( len == 0 ) return -EBADMSG;
+ if ( len == 1 ) return 1; /* input starts with a NUL char */
+ if ( strncpy_from_user(memstring, buffer, len) < 0 )
return -EFAULT;
endchar = memstring;
O_TARGET := kernel.o
-export-objs := i386_ksyms.o gnttab.o
+export-objs := i386_ksyms.o gnttab.o skbuff.o
obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
ptrace.o ioport.o ldt.o setup.o time.o sys_i386.o \
i386_ksyms.o i387.o evtchn.o ctrl_if.o pci-dma.o \
- reboot.o fixup.o gnttab.o
+ reboot.o fixup.o gnttab.o skbuff.o
ifdef CONFIG_PCI
obj-y += pci-i386.o pci-pc.o
#ifndef __ASSEMBLY__
#include <linux/config.h>
+#include <linux/string.h>
#include <asm/types.h>
#include <asm/hypervisor-ifs/hypervisor-if.h>
+#ifdef CONFIG_XEN_SCRUB_PAGES
+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
+#else
+#define scrub_pages(_p,_n) ((void)0)
+#endif
+
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
*
* %NULL is returned in there is no free memory.
*/
-
+#ifndef CONFIG_XEN
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
int gfp_mask)
{
- struct sk_buff *skb;
-#if defined(CONFIG_XEN)
- length = (PAGE_SIZE/2)+1; /* force slab allocater to give us a page */
-#endif
- skb = alloc_skb(length+16, gfp_mask);
+ struct sk_buff *skb = alloc_skb(length+16, gfp_mask);
if (skb)
skb_reserve(skb,16);
return skb;
}
+#else
+extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
+#endif
/**
* dev_alloc_skb - allocate an skbuff for sending
ln -sf ../../../${LINUX_26}/arch/xen/kernel/fixup.c
ln -sf ../../../${LINUX_26}/arch/xen/kernel/gnttab.c
ln -sf ../../../${LINUX_26}/arch/xen/kernel/reboot.c
+ln -sf ../../../${LINUX_26}/arch/xen/kernel/skbuff.c
ln -sf ../../../${LINUX_26}/arch/xen/i386/kernel/ioport.c
ln -sf ../../../${LINUX_26}/arch/xen/i386/kernel/pci-dma.c
help
Use writable L1 pagetables
+config XEN_SCRUB_PAGES
+ bool "Scrub memory before freeing it to Xen"
+ default y
+ help
+ Erase memory contents before freeing it back to Xen's global
+ pool. This ensures that any secrets contained within that
+ memory (e.g., private keys) cannot be found by other guests that
+ may be running on the machine. Most people will want to say Y here.
+ If security is not a concern then you may increase performance by
+ saying N.
+
endmenu
config FOREIGN_PAGES
bool
default y
-config PAGESIZED_SKBS
+config HAVE_ARCH_DEV_ALLOC_SKB
bool
- default y if XEN_NETDEV_BACKEND
- default n if !XEN_NETDEV_BACKEND
+ default y
#config VT
# bool
CONFIG_XEN_NETDEV_FRONTEND=y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
CONFIG_XEN_WRITABLE_PAGETABLES=y
+CONFIG_XEN_SCRUB_PAGES=y
CONFIG_FOREIGN_PAGES=y
-CONFIG_PAGESIZED_SKBS=y
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
CONFIG_X86=y
# CONFIG_X86_64 is not set
CONFIG_XEN_NETDEV_FRONTEND=y
# CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
CONFIG_XEN_WRITABLE_PAGETABLES=y
+CONFIG_XEN_SCRUB_PAGES=y
CONFIG_FOREIGN_PAGES=y
-# CONFIG_PAGESIZED_SKBS is not set
+CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
CONFIG_X86=y
# CONFIG_X86_64 is not set
pmd_t *pmd;
pte_t *pte;
unsigned long pfn, i;
+ scrub_pages(vstart, 1 << order);
/* 1. Zap current PTEs, giving away the underlying pages. */
for (i = 0; i < (1<<order); i++) {
pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
if ( vstart == 0 )
return 0UL;
+ scrub_pages(vstart, 1 << order);
+
pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
if ( pfn_array == NULL )
BUG();
extra-y += vmlinux.lds.s
obj-y := ctrl_if.o evtchn.o fixup.o process.o reboot.o xen_proc.o empty.o \
- gnttab.o
+ gnttab.o skbuff.o
--- /dev/null
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <asm/page.h>
+
+EXPORT_SYMBOL(__dev_alloc_skb);
+
+static kmem_cache_t *skbuff_cachep;
+
+struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask)
+{
+ struct sk_buff *skb;
+ u8 *new_data, *new_shinfo;
+
+ /*
+ * Yuk! There is no way to get a skbuff head without allocating the
+ * data area using kmalloc(). So we do that and then replace the default
+ * data area with our own.
+ */
+ skb = alloc_skb(0, gfp_mask);
+ if ( unlikely(skb == NULL) )
+ return NULL;
+
+ new_data = kmem_cache_alloc(skbuff_cachep, gfp_mask);
+ if ( new_data == NULL )
+ {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ new_shinfo =
+ new_data + PAGE_SIZE - sizeof(struct skb_shared_info);
+ memcpy(new_shinfo, skb_shinfo(skb), sizeof(struct skb_shared_info));
+
+ kfree(skb->head);
+
+ skb->head = new_data;
+ skb->data = skb->tail = new_data + 16; /* __dev_alloc_skb does this */
+ skb->end = new_shinfo;
+ skb->truesize = 1500; /* is this important? */
+
+ return skb;
+}
+
+static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused)
+{
+ scrub_pages(buf, 1);
+}
+
+static int __init skbuff_init(void)
+{
+ skbuff_cachep = kmem_cache_create(
+ "xen-skb", PAGE_SIZE, PAGE_SIZE, 0, skbuff_ctor, NULL);
+ return 0;
+}
+__initcall(skbuff_init);
(((unsigned long)skb->end ^ (unsigned long)skb->head) & PAGE_MASK) ||
((skb->end - skb->head) < (PAGE_SIZE/2)) )
{
- struct sk_buff *nskb = alloc_skb(PAGE_SIZE-1024, GFP_ATOMIC);
+ struct sk_buff *nskb = dev_alloc_skb(PAGE_SIZE);
int hlen = skb->data - skb->head;
if ( unlikely(nskb == NULL) )
goto drop;
#ifndef __GFP_NOWARN
#define __GFP_NOWARN 0
#endif
+#define alloc_skb_page() __dev_alloc_skb(PAGE_SIZE, GFP_ATOMIC|__GFP_NOWARN)
/*
* If the backend driver is pipelining transmit requests then we can be very
* @param dev device
* @return 0 on success, error code otherwise
*/
-static int vif_wake(struct net_device *dev){
- int err = 0;
+static int vif_wake(struct net_device *dev)
+{
struct sk_buff *skb;
- u32 src_ip;
- u32 dst_ip = INADDR_BROADCAST;
- unsigned char dst_hw[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ u32 src_ip, dst_ip;
+ unsigned char dst_hw[ETH_ALEN];
+ memset(dst_hw, 0xff, ETH_ALEN);
+
+ dst_ip = INADDR_BROADCAST;
src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
+
skb = arp_create(ARPOP_REQUEST, ETH_P_ARP,
dst_ip, dev, src_ip,
dst_hw, dev->dev_addr, NULL);
- if(skb == NULL){
- err = -ENOMEM;
- goto exit;
- }
- err = dev_queue_xmit(skb);
- exit:
- return err;
-}
+ if ( skb == NULL )
+ return -ENOMEM;
-static inline struct sk_buff *alloc_skb_page(void)
-{
- struct sk_buff *skb;
- skb = __dev_alloc_skb((PAGE_SIZE/2)+1, GFP_ATOMIC|__GFP_NOWARN);
-#if 0
- if ( skb && unlikely(((unsigned long)skb->head & (PAGE_SIZE-1)) != 0) )
- panic("alloc_skb needs to provide us page-aligned buffers.");
-#endif
- return skb;
+ return dev_queue_xmit(skb);
}
static int network_open(struct net_device *dev)
#ifndef __ASSEMBLY__
#include <linux/config.h>
+#include <linux/string.h>
#include <linux/types.h>
#include <asm/hypervisor-ifs/hypervisor-if.h>
+#ifdef CONFIG_XEN_SCRUB_PAGES
+#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
+#else
+#define scrub_pages(_p,_n) ((void)0)
+#endif
+
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
*
* %NULL is returned in there is no free memory.
*/
+#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
int gfp_mask)
{
- struct sk_buff *skb;
-#ifdef CONFIG_PAGESIZED_SKBS
- length = max(length, (unsigned int)(PAGE_SIZE - 16));
-#endif
- skb = alloc_skb(length + 16, gfp_mask);
+ struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
if (likely(skb))
skb_reserve(skb, 16);
return skb;
}
+#else
+extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
+#endif
/**
* dev_alloc_skb - allocate an skbuff for sending
#include <xen/spinlock.h>
#include <xen/slab.h>
#include <xen/irq.h>
+#include <asm/domain_page.h>
extern char opt_badpage[];
{
int i, drop_dom_ref;
struct domain *d = pg->u.inuse.domain;
+ void *p;
if ( unlikely(IS_XEN_HEAP_FRAME(pg)) )
{
for ( i = 0; i < (1 << order); i++ )
{
-#ifndef NDEBUG
- if ( pg[i].u.inuse.type_info & PGT_count_mask )
- printk("ERROR: type count not zero on free %x\n",
- pg[i].u.inuse.type_info );
-#endif
+ ASSERT((pg[i].u.inuse.type_info & PGT_count_mask) == 0);
pg[i].tlbflush_timestamp = tlbflush_clock;
pg[i].u.free.cpu_mask = 1 << d->processor;
list_del(&pg[i].list);
+
+ /*
+ * Normally we expect a domain to clear pages before freeing them,
+ * if it cares about the secrecy of their contents. However, after
+ * a domain has died we assume responsibility for erasure.
+ */
+ if ( unlikely(test_bit(DF_DYING, &d->flags)) )
+ {
+ p = map_domain_mem(page_to_phys(&pg[i]));
+ clear_page(p);
+ unmap_domain_mem(p);
+ }
}
d->tot_pages -= 1 << order;